Lucent paket sadrži i funkcionalnost da za dani chnanel nađe više od jednog inputa koji ga jako aktivira. Dolje to ilustriramo s par primjera.
pip install --quiet torch-lucent
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
Note: you may need to restart the kernel to use updated packages.
from lucent.optvis.transform import pad, jitter, random_rotate, random_scale
from lucent.optvis import render, param, transform, objectives
import torch
import numpy as np
from lucent.optvis import render, param, transform, objectives
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
import torch.nn as nn
import torch.nn.functional as F
class KittyNet(nn.Module):
def __init__(self):
super().__init__()
self.bn0 = nn.BatchNorm2d(3)
self.conv1 = nn.Conv2d(3, 9, 3)
self.pool1 = nn.AvgPool2d(4, 4)
self.conv1_bn = nn.BatchNorm2d(9)
self.conv2 = nn.Conv2d(9, 16, 3)
self.pool2 = nn.AvgPool2d(4, 4)
self.conv2_bn = nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 25, 3)
self.pool3 = nn.AvgPool2d(4, 4)
self.conv3_bn = nn.BatchNorm2d(25)
self.conv4 = nn.Conv2d(25, 36, 3)
self.pool4 = nn.AvgPool2d(2 , 2)
self.fc = nn.Linear(324, 4)
def forward(self, x):
x = self.bn0(x)
x = self.conv1_bn(self.pool1(F.relu(self.conv1(x))))
x = self.conv2_bn(self.pool2(F.relu(self.conv2(x))))
x = self.conv3_bn(self.pool3(F.relu(self.conv3(x))))
x = self.pool4(F.relu(self.conv4(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = self.fc(x)
return x
class LongcatNet(nn.Module):
def __init__(self):
super().__init__()
self.bn1 = nn.BatchNorm2d(3)
self.conv1 = nn.Conv2d(3, 9, 3)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2_bn = nn.BatchNorm2d(9)
self.conv2 = nn.Conv2d(9, 16, 3)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv3_bn = nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 25, 3)
self.pool3 = nn.MaxPool2d(2, 2)
self.conv4_bn = nn.BatchNorm2d(25)
self.conv4 = nn.Conv2d(25, 36, 3)
self.pool4 = nn.MaxPool2d(2, 2)
self.conv5_bn = nn.BatchNorm2d(36)
self.conv5 = nn.Conv2d(36, 36, 3)
self.conv6_bn = nn.BatchNorm2d(36)
self.conv6 = nn.Conv2d(36, 49, 3)
self.conv7_bn = nn.BatchNorm2d(49)
self.conv7 = nn.Conv2d(49, 49, 3)
self.conv8_bn = nn.BatchNorm2d(49)
self.conv8 = nn.Conv2d(49, 49, 3)
self.conv9_bn = nn.BatchNorm2d(49)
self.conv9 = nn.Conv2d(49, 49, 3)
self.pool9 = nn.MaxPool2d(2, 2)
self.conv10_bn = nn.BatchNorm2d(49)
self.conv10 = nn.Conv2d(49, 49, 3)
self.pool10 = nn.MaxPool2d(2, 2)
self.fc = nn.Linear(1764, 4)
def forward(self, x):
x = self.bn1(x)
x = self.conv2_bn(self.pool1(F.relu(self.conv1(x))))
x = self.conv3_bn(self.pool2(F.relu(self.conv2(x))))
x = self.conv4_bn(self.pool3(F.relu(self.conv3(x))))
x = self.conv5_bn(self.pool4(F.relu(self.conv4(x))))
x = self.conv6_bn(F.relu(self.conv5(x)))
x = self.conv7_bn(F.relu(self.conv6(x)))
x = self.conv8_bn(F.relu(self.conv7(x)))
x = self.conv9_bn(F.relu(self.conv8(x)))
x = self.conv10_bn(self.pool9(F.relu(self.conv9(x))))
x = self.pool10(F.relu(self.conv10(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = self.fc(x)
return x
kitty = KittyNet()
longcat = LongcatNet()
inception = torch.hub.load('pytorch/vision:v0.10.0', 'googlenet', pretrained=True)
kitty.load_state_dict(torch.load('saved_models/kitty/epoch_7_batch_5000.pth', map_location=device))
longcat.load_state_dict(torch.load('saved_models/longcat/epoch_7_batch_5000.pth', map_location=device))
inception.load_state_dict(torch.load('saved_models/inception/epoch_7_batch_5000.pth', map_location=device))
Using cache found in /root/.cache/torch/hub/pytorch_vision_v0.10.0
<All keys matched successfully>
kitty.to(device).eval()
longcat.to(device).eval()
inception.to(device).eval()
GoogLeNet(
(conv1): BasicConv2d(
(conv): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(maxpool1): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=True)
(conv2): BasicConv2d(
(conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(conv3): BasicConv2d(
(conv): Conv2d(64, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(192, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(maxpool2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=True)
(inception3a): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(192, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(96, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(192, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(16, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(32, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(32, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(inception3b): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(192, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(32, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(32, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(96, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(maxpool3): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=True)
(inception4a): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(480, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(192, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(480, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(96, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(96, 208, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(208, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(480, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(16, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(16, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(48, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(480, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(inception4b): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(512, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(160, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(512, 112, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(112, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(112, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(224, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(512, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(24, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(24, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(512, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(inception4c): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(256, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(512, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(24, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(24, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(512, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(inception4d): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(512, 112, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(112, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(512, 144, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(144, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(144, 288, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(288, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(32, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(512, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(inception4e): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(528, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(256, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(528, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(160, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(160, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(320, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(528, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(32, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(32, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(528, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(maxpool4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=True)
(inception5a): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(832, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(256, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(832, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(160, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(160, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(320, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(832, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(32, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(32, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(832, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(inception5b): Inception(
(branch1): BasicConv2d(
(conv): Conv2d(832, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(384, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(branch2): Sequential(
(0): BasicConv2d(
(conv): Conv2d(832, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(192, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(384, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch3): Sequential(
(0): BasicConv2d(
(conv): Conv2d(832, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(48, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicConv2d(
(conv): Conv2d(48, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
(branch4): Sequential(
(0): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=True)
(1): BasicConv2d(
(conv): Conv2d(832, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(aux1): None
(aux2): None
(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
(dropout): Dropout(p=0.2, inplace=False)
(fc): Linear(in_features=1024, out_features=1000, bias=True)
)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# da bilježnice budu manje:
%config InlineBackend.figure_format = 'jpg'
def lucent_show_layer(model, layer, grid_dim,
param_f=None, transforms=None,
optimizer=None, image_size=128):
n_row = grid_dim
n_col = grid_dim
_, axs = plt.subplots(n_row, n_col, figsize=(19.55, 20))
axs = axs.flatten()
for ix, ax in zip(range(n_row*n_col), axs):
img = render.render_vis(model, f"{layer}:{ix}", param_f=param_f,
transforms=transforms, progress=False, show_image=False)[0]
img = np.reshape(img, (image_size, image_size, 3))
ax.imshow(img)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.margins(x=0, y=0, tight=True)
plt.subplots_adjust(wspace=0, hspace=0)
plt.show()
batch_param_f = lambda: param.image(128, batch=4)
obj = objectives.channel("conv1", 0) - 5e1 * objectives.diversity("conv1") # here we use a higher weight on the diversity term
_ = render.render_vis(kitty, obj, batch_param_f, show_inline=True)
0%| | 0/512 [00:00<?, ?it/s]/usr/local/lib/python3.6/dist-packages/lucent/optvis/render.py:104: UserWarning: Some layers could not be computed because the size of the image is not big enough. It is fine, as long as the noncomputed layers are not used in the objective function(exception details: 'Calculated padded input size per channel: (2 x 2). Kernel size: (3 x 3). Kernel size can't be greater than actual input size') "Some layers could not be computed because the size of the " 100%|██████████| 512/512 [00:10<00:00, 47.92it/s]
obj = objectives.channel("conv2", 0) - 1e2 * objectives.diversity("conv2") # here we use a higher weight on the diversity term
_ = render.render_vis(kitty, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:10<00:00, 50.01it/s]
obj = objectives.channel("conv3", 0) - 1e3 * objectives.diversity("conv3") # here we use a higher weight on the diversity term
_ = render.render_vis(kitty, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:10<00:00, 49.08it/s]
obj = objectives.channel("conv1", 0) - 1e2 * objectives.diversity("conv1") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
0%| | 0/512 [00:00<?, ?it/s]/usr/local/lib/python3.6/dist-packages/lucent/optvis/render.py:104: UserWarning: Some layers could not be computed because the size of the image is not big enough. It is fine, as long as the noncomputed layers are not used in the objective function(exception details: 'Calculated padded input size per channel: (1 x 1). Kernel size: (3 x 3). Kernel size can't be greater than actual input size') "Some layers could not be computed because the size of the " 100%|██████████| 512/512 [00:10<00:00, 48.17it/s]
obj = objectives.channel("conv2", 2) - 1e3 * objectives.diversity("conv2") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:10<00:00, 46.69it/s]
obj = objectives.channel("conv3", 2) - 1e3 * objectives.diversity("conv3") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:11<00:00, 45.24it/s]
obj = objectives.channel("conv4", 2) - 1e3 * objectives.diversity("conv4") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:11<00:00, 43.19it/s]
obj = objectives.channel("conv5", 0) - 1e3 * objectives.diversity("conv5") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:12<00:00, 40.86it/s]
obj = objectives.channel("conv6", 0) - 1e3 * objectives.diversity("conv6") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:12<00:00, 41.69it/s]
obj = objectives.channel("conv7", 1) - 1e3 * objectives.diversity("conv7") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:12<00:00, 41.55it/s]
obj = objectives.channel("conv8", 1) - 1e3 * objectives.diversity("conv8") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:12<00:00, 41.38it/s]
obj = objectives.channel("conv9", 1) - 5e2 * objectives.diversity("conv9") # here we use a higher weight on the diversity term
_ = render.render_vis(longcat, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:12<00:00, 40.54it/s]
batch_param_f = lambda: param.image(256, fft=True, batch=4)
obj = objectives.channel("inception4a", 97) - 1e2 * objectives.diversity("inception4a") # here we use a higher weight on the diversity term
_ = render.render_vis(inception, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:16<00:00, 31.03it/s]
batch_param_f = lambda: param.image(256, fft=True, batch=4)
obj = objectives.channel("inception4a", 98) - 1e2 * objectives.diversity("inception4a") # here we use a higher weight on the diversity term
_ = render.render_vis(inception, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:16<00:00, 30.90it/s]
batch_param_f = lambda: param.image(256, fft=True, batch=4)
obj = objectives.channel("inception4b", 98) - 1e2 * objectives.diversity("inception4a") # here we use a higher weight on the diversity term
_ = render.render_vis(inception, obj, batch_param_f, show_inline=True)
100%|██████████| 512/512 [00:17<00:00, 28.55it/s]